Vehicle Detection

  1. NOTE: Requires OpenCV 3.2 and opencv-contrib (pip install opencv-contrib-python)
  2. ToC will be generated in the first code block
  3. ToC will become incrementally active as cells are executed
In [1]:
%%HTML
<!--
HTML code to create the sidebar, menu and independent popup window.
Inspiration: https://github.com/vizmotion/jupyter-navigation
-->
<style>
    .yourDiv {position: fixed;top: 100px; left: 0px; background: white;height: 100%;width: 150px; padding: 20px; z-index: 10000}
</style>
<script>
function showthis(url) {
	window.open(url, "pres", "toolbar=yes,scrollbars=yes,resizable=yes,top=10,left=400,width=500,height=500");
	return(false);
}
</script>

<div class=yourDiv>
    <a href=#loc_settings>Run Settings</a><br>
    <a href=#loc_part1_extraction_pipeline>Part 1: Extraction Pipeline</a><br>
    <a href=#loc_classifier_training>Train Classifier</a><br>
    <a href=#loc_part2_load_classifier>Part 2: Load classifier</a></br>
    <a href=#loc_window_search>Window Search</a><br>
    <a href=#loc_tracker>Tracker</a><br>
    <a href=#loc_apply_pipeline>Apply pipeline to Video</a><br>
</div>
In [2]:
%%HTML
<a name="loc_settings"></a>

Run Settings

In [3]:
SETTING_RETRAIN_CLASSIFIER = True         # Change this to retrain the classifier
SETTING_SAVE_RETRAINED_CLASSIFIER = True   # Change this to save the retrained classifier (if retraining)
SETTING_LOAD_TRAINED_CLASSIFIER = True     # Change this in case SETTING_SAVE_RETRAINED_CLASSIFIER and you want to use the freshly trained classifier without overwriting the one on disc
SETTING_TEST_CLASSIFIER_ON_TEST_IMAGES = True  # Change this to show how the classifier works on test images

Imports

----------------------

Note: Uses OpenCV 3.2 with Contrib (pip install opencv-contrib-python)

----------------------

In [4]:
# SKLearn
import sklearn
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC, NuSVC
from sklearn import svm
from skimage.feature import hog
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix, classification_report


# MatPlotLib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline

# The rest
import numpy as np
from scipy.ndimage.measurements import label
import cv2
from glob import glob
import random
import csv
import time
import os
from importlib import reload
import multiprocessing as multiprocessing

# Custom code 
from common_geometry import Rect, Point
from window_slider import WindowSlider, PartitioningWindowSlider, PartitioningWindowSliderGroup
from visualization_utils import draw_boxes, draw_rects
/Users/rbaron/miniconda3/envs/carnd-term1b/lib/python3.5/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
In [5]:
%%HTML
<a name="loc_part1_extraction_pipeline"></a>

Part 1: Define Feature Extraction Pipeline

In [6]:
def convert_color(img, color_space='LUV'):
    if color_space != 'RGB':
        if color_space == 'HSV':
            feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        elif color_space == 'LUV':
            feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
        elif color_space == 'HLS':
            feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
        elif color_space == 'YUV':
            feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
        elif color_space == 'YCrCb':
            feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
    else: 
        feature_image = np.copy(img)
    return feature_image   

def get_hog_features(img, 
                     orient,
                     pix_per_cell, 
                     cell_per_block,
                     vis=False, 
                     feature_vec=True):
    # Call with two outputs if vis==True
    if vis == True:
        features, hog_image = hog(img, 
                                  orientations=orient, 
                                  pixels_per_cell=(pix_per_cell, pix_per_cell),
                                  cells_per_block=(cell_per_block, cell_per_block), 
                                  transform_sqrt=False, 
                                  visualise=vis,
                                  feature_vector=feature_vec,
                                  block_norm='L2'
                                 )
        return features, hog_image
    # Otherwise call with one output
    else:      
        features = hog(img, orientations=orient, 
                       pixels_per_cell=(pix_per_cell, pix_per_cell),
                       cells_per_block=(cell_per_block, cell_per_block), 
                       transform_sqrt=False, 
                       visualise=vis, feature_vector=feature_vec,
                       block_norm='L2')
        return features

def bin_spatial(img, size=(32, 32)):
    color1 = cv2.resize(img[:,:,0], size, interpolation=cv2.INTER_LINEAR).ravel()
    color2 = cv2.resize(img[:,:,1], size, interpolation=cv2.INTER_LINEAR).ravel()
    color3 = cv2.resize(img[:,:,2], size, interpolation=cv2.INTER_LINEAR).ravel()
    return np.hstack((color1, color2, color3))
                        
def color_hist(img, nbins=32, bins_range=(0, 256)):    #
    # Compute the histogram of the color channels separately
    channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
    channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
    channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
    # Concatenate the histograms into a single feature vector
    hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
    return hist_features
In [7]:
IMG_SHAPE = (64, 64)

class FeatureExtractorConfig:
    def __init__(self):
        self.color_space = 'LUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
        self.spatial_size = (48, 48) # Spatial binning dimensions
        self.hist_bins = 64    # Number of histogram bins
        self.orient = 12  # HOG orientations
        self.pix_per_cell = 8 # HOG pixels per cell
        self.cell_per_block = 1 # HOG cells per block
        self.hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
        self.spatial_feat = True # Spatial features on or off
        self.hist_feat = True # Histogram features on or off
        self.hog_feat = True # HOG features on or off
        
    def print(self):
        print('Using: {} orientations; {} pixels per cell; and {} cells per block'.format(
            self.orient, 
            self.pix_per_cell, 
            self.cell_per_block))
In [8]:
class FeatureExtractor:
    def __init__(self, config):
        self.config = config
        
    def extract_features(self, img):   
        
        # Resize to target spacial size
        img_resize = cv2.resize(img, self.config.spatial_size, interpolation=cv2.INTER_LINEAR)
        img_resize = (np.sqrt(img_resize.astype(np.float32)/255)*255).astype(np.uint8)    
        file_features = []   
        # Color space
        feature_image = convert_color(img_resize, color_space=self.config.color_space)
        file_features.append(feature_image.ravel())
        # Spatial color binning
        if self.config.spatial_feat == True:
            spatial_features = bin_spatial(feature_image, size=self.config.spatial_size)
            file_features.append(spatial_features)
        # Histogram color
        if self.config.hist_feat == True:
            hist_features = color_hist(feature_image, nbins=self.config.hist_bins)
            file_features.append(hist_features)
        # Histogram of oriented gradients
        if self.config.hog_feat == True:
        # Call get_hog_features() with vis=False, feature_vec=True
            if self.config.hog_channel == 'ALL':
                hog_features = []
                for channel in range(feature_image.shape[2]):
                    hog_features.append(get_hog_features(feature_image[:,:,channel], 
                                                         self.config.orient, 
                                                         self.config.pix_per_cell, 
                                                         self.config.cell_per_block)
                                       )
                hog_features = np.ravel(hog_features)        
            else:
                hog_features = get_hog_features(feature_image[:,:,self.config.hog_channel], 
                                                self.config.orient, 
                                                self.config.pix_per_cell, 
                                                self.config.cell_per_block)
            # Append the new feature vector to the features list
        file_features.append(hog_features)   
        #  print(len(file_features), len(spatial_features), len(hist_features), len(hog_features)) 
        return np.concatenate(file_features)
In [9]:
%%HTML
<a name="loc_classifier_training"></a>

Load training data for the classifier

In [10]:
class TrainingImagesConfig:
    def __init__(self):
        self.vehicle_fnames = glob('dataset/vehicles/*/*.png')
        self.non_vehicle_fnames = glob('dataset/non-vehicles/*/*.png') 
        self.non_vehicle_mined_fnames = glob('dataset/non-vehicles-mined/*.png') 
        self.vehicle_samples = 8790
        self.non_vehicle_samples = 8790

class TrainingImageLoader:
    def __init__(self, config):
        self.config = config
        self.is_loaded = False
    
    def load(self):
        # Sample from file names. We don't subsample hard mined images deliberately.
        vehicle_fnames = random.sample(self.config.vehicle_fnames, self.config.vehicle_samples)
        non_vehicle_fnames = random.sample(self.config.non_vehicle_fnames, self.config.non_vehicle_samples)

        with multiprocessing.Pool() as pool:

            # Load each image into memory
            self.car_images = pool.map(mpimg.imread, vehicle_fnames)
            self.non_car_images = pool.map(mpimg.imread, non_vehicle_fnames)
            self.non_car_mined_images = pool.map(mpimg.imread,  self.config.non_vehicle_mined_fnames)

            # Convert to 8-bit channels
            self.car_images = pool.map(TrainingImageLoader.convert_to_8_bit, self.car_images)
            self.non_car_images = pool.map(TrainingImageLoader.convert_to_8_bit, self.non_car_images)
            self.non_car_mined_images = pool.map(TrainingImageLoader.convert_to_8_bit, self.non_car_mined_images)

            # Hard mined non-car images need to be resized
            self.non_car_mined_images = pool.map(TrainingImageLoader.resize_to_expected_shape, self.non_car_mined_images)
            
            
            
#             # Load each image into memory
#             self.car_images = [mpimg.imread(fname) for fname in vehicle_fnames]
#             self.non_car_images = [mpimg.imread(fname) for fname in non_vehicle_fnames]
#             self.non_car_mined_images = [mpimg.imread(fname) for fname in self.config.non_vehicle_mined_fnames]

#             # Convert to 8-bit channels
#             self.car_images = [(image.astype(np.float32)/np.max(image)*255).astype(np.uint8) for image in self.car_images]
#             self.non_car_images = [(image.astype(np.float32)/np.max(image)*255).astype(np.uint8) for image in self.non_car_images]
#             self.non_car_mined_images = [(image.astype(np.float32)/np.max(image)*255).astype(np.uint8) for image in self.non_car_mined_images]

#             # Hard mined non-car images need to be resized
#             self.non_car_mined_images = [cv2.resize(image, IMG_SHAPE) for image in self.non_car_mined_images]
        
        self.is_loaded = True
        
    def resize_to_expected_shape(image):
        return cv2.resize(image, IMG_SHAPE)
        
    def convert_to_8_bit(image):
        return (image.astype(np.float32)/np.max(image)*255).astype(np.uint8)
    
    def is_loaded(self):
        return self.is_loaded


if SETTING_RETRAIN_CLASSIFIER == True:

    print("Loading images - Starting")
    training_images_config = TrainingImagesConfig()
    training_img_loader = TrainingImageLoader(training_images_config)
    training_img_loader.load()
    print("Loading images - Done")

    
Loading images - Starting
Loading images - Done

Extract features

In [11]:
class TrainingAndTestSet:
    def __init__(self, 
                 car_images,
                 non_car_images, 
                 test_size, 
                 feature_extractor):
        assert car_images is not None, "Images must be loaded prior to TrainingAndTestSet initialization"
        assert non_car_images is not None, "Images must be loaded prior to TrainingAndTestSet initialization"
        assert feature_extractor is not None, "Must provide feature extractor"
        
        self.car_images = car_images
        self.non_car_images = non_car_images
        self.test_size = test_size
        self.feature_extractor = feature_extractor
        
    def load(self):
        # Extract features 
        with multiprocessing.Pool() as pool:
            car_features = pool.map(self.feature_extractor.extract_features, self.car_images)
            non_car_features = pool.map(self.feature_extractor.extract_features, self.non_car_images)
            #car_features = [feature_extractor.extract_features(image) for image in car_images]
            #non_car_features = [feature_extractor.extract_features(image) for image in non_car_images]
        
        # Create labels
        X_cars = np.vstack(car_features)
        y_cars = np.ones(X_cars.shape[0], dtype=np.uint8)
        X_non_cars = np.vstack(non_car_features)
        y_non_cars = np.zeros(X_non_cars.shape[0], dtype=np.uint8)
        
        print('{} cars & {} non-cars labeled'.format(len(X_cars), len(X_non_cars)))

        # Join the training vector and labels
        X = np.vstack((X_cars, X_non_cars))
        y = np.concatenate((y_cars, y_non_cars))
        
        # Normalize the training vectors
        # Fit a per-column scaler
        self.X_scaler = StandardScaler().fit(X)

        # Apply the scaler to X
        scaled_X = self.X_scaler.transform(X)
        
        # Split up data into randomized training and test sets
        rand_state = np.random.randint(0, 100)
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            scaled_X, 
            y, 
            test_size=self.test_size, 
            random_state=rand_state
        )
    
def retrain_classifier():
    print("Extracting features and creating test set - Starting")
    
    feature_extractor_config = FeatureExtractorConfig()
    feature_extractor_config.print()
    feature_extractor = FeatureExtractor(feature_extractor_config)
    all_non_car_images = training_img_loader.non_car_images + training_img_loader.non_car_mined_images
    training_and_test_set = TrainingAndTestSet(training_img_loader.car_images, 
                                               all_non_car_images, 
                                               0.2,
                                               feature_extractor
                                              )
    training_and_test_set.load()
    print("Extracting features and creating test set - Done")
    return training_and_test_set

    
if SETTING_RETRAIN_CLASSIFIER == True:
    training_and_test_set = retrain_classifier()
Extracting features and creating test set - Starting
Using: 12 orientations; 8 pixels per cell; and 1 cells per block
8790 cars & 8887 non-cars labeled
Extracting features and creating test set - Done

Train the classifier

Grid search (commented out because we have a good parameter combination now)

In [12]:
# After trying an (nearly) infite number of kernels via grid search... I  settle on the best performing one
#SEARCH_C_VALUES=[0.0001]
#SEARCH_KERNEL_VALUES=['linear'] 
#SEARCH_GAMMA_VALUES=[0.00001]
#parameters = {'kernel': SEARCH_KERNEL_VALUES, 'C': SEARCH_C_VALUES, 'gamma': SEARCH_GAMMA_VALUES}
#
# def optimize_param(parameters):
#     svc = GridSearchCV(svm.SVC(), parameters)
#     # Check the training time for the SVC
#     t=time.time()
#     svc.fit(X_train, y_train)
#     t2 = time.time()
#     print(round(t2-t, 2), 'Seconds to train SVC...')
#     optimal_params = svc.best_params_
#     print("Optimal parameters found: ", optimal_params, "\n")
#     # Check the score of the SVC
#     test_score = round(svc.score(X_test, y_test), 4)
#     print('Test Accuracy of SVC = ', test_score)
#     # Check the prediction time for a single sample
#     t=time.time()

Final training

In [13]:
class VehicleClassifier:
    
    def __init__(self, feature_extractor, svca, X_scaler):
        self.feature_extractor = feature_extractor
        self.svca = svca
        self.X_scaler = X_scaler
        
    def is_vehicle(self, image):
        resized_img = cv2.resize(image, IMG_SHAPE)
        #4) Extract features for that window using single_img_features()
        features = self.feature_extractor.extract_features(resized_img)
        #5) Scale extracted features to be fed to classifier
        scaled_features = classifier.X_scaler.transform(np.array(features).reshape(1, -1))
        #6) Predict using your classifier
        prediction = classifier.svca.predict(scaled_features)
        # Vehicle is class 1
        return prediction == 1     
        

class ClassifierTrainer:
    
    def train(training_and_test_set):
        print('Feature vector length:', len(training_and_test_set.X_train[0]))

        svca = LinearSVC(C=0.0001, dual=True, max_iter=10)

        batch_size = len(training_and_test_set.X_train)//1
        print('training on {} samples, {} batches, each batch {} samples'.format(
            len(training_and_test_set.X_train), 
            len(training_and_test_set.X_train)//batch_size, 
            batch_size))

        for batch in range(0,len(training_and_test_set.X_train)//batch_size):
            t=time.time()
            svca.fit(
                training_and_test_set.X_train[batch*batch_size:(batch+1)*batch_size], 
                training_and_test_set.y_train[batch*batch_size:(batch+1)*batch_size]
            )
            t2 = time.time()

            # Check the score of the SVC
            test_score = round(svca.score(training_and_test_set.X_test, training_and_test_set.y_test), 4)
            print('Batch {}; seconds to train {}; test accuracy {}'.format(batch+1, round(t2-t, 2), test_score))

        return VehicleClassifier(training_and_test_set.feature_extractor, 
                                 svca, 
                                 training_and_test_set.X_scaler)
    
    def print_stats(classifier, training_and_test_set):
            predict = classifier.svca.predict(training_and_test_set.X_test)
            labels = training_and_test_set.y_test
#             print(labels[:20])
#             print(predict[:20])
#             cm = confusion_matrix(labels, predict)
#             tot = cm.sum()
#             TN = cm[0][0]/tot
#             FP = cm[0][1]/tot
#             FN = cm[1][0]/tot
#             TP = cm[1][1]/tot
#             print("%s %.2f%% %s %.2f%% %s %.2f%% %s %.2f%%\n" % ('TP:',TP, 'FP:',FP, 'TN:',TN, 'FN:',FN))   
            print(classification_report(labels, predict))


if SETTING_RETRAIN_CLASSIFIER == True:
    classifier = ClassifierTrainer.train(training_and_test_set)
    print("Training complete. Statistics: ")
    ClassifierTrainer.print_stats(classifier, training_and_test_set)
Feature vector length: 15312
training on 14141 samples, 1 batches, each batch 14141 samples
Batch 1; seconds to train 7.28; test accuracy 0.9941
Training complete. Statistics: 
             precision    recall  f1-score   support

          0       0.99      1.00      0.99      1789
          1       1.00      0.99      0.99      1747

avg / total       0.99      0.99      0.99      3536

Save the classifier if we're happy with it

In [14]:
CLASSIFIER_SAVE_PATH = 'intermediates/car_classifier_with_mined.pkl'
SCALER_SAVE_PATH = 'intermediates/car_scaler_with_mined.pkl'

if SETTING_SAVE_RETRAINED_CLASSIFIER == True and SETTING_RETRAIN_CLASSIFIER == True:
    joblib.dump(classifier.svca, CLASSIFIER_SAVE_PATH)
    joblib.dump(classifier.X_scaler, SCALER_SAVE_PATH)
In [15]:
%%HTML
<a name="loc_part2_load_classifier"></a>

Part 2: Load the saved classifier

In [16]:
CLASSIFIER_LOAD_PATH = 'intermediates/car_classifier_with_mined.pkl'
SCALER_LOAD_PATH = 'intermediates/car_scaler_with_mined.pkl'

def load_classifier(classifier_path, scaler_path):
    svca = None
    svca = joblib.load(classifier_path)
    if svca != None:
        print('Classifier loaded successfully:')
        print(svca)
    X_scaler = joblib.load(scaler_path)
    if X_scaler != None:
        print('Scaler loaded successfully:')
        print(X_scaler)
    # WARNING!!!! Config must match saved classifier config! 
    # TODO: Persistance of classifier should also save config. Same for loading
    feature_extractor_config = FeatureExtractorConfig()
    feature_extractor = FeatureExtractor(feature_extractor_config)
    classifier = VehicleClassifier(feature_extractor, svca, X_scaler)
    return classifier

if SETTING_LOAD_TRAINED_CLASSIFIER == True:
    classifier = load_classifier(CLASSIFIER_LOAD_PATH, SCALER_LOAD_PATH)
   
Classifier loaded successfully:
LinearSVC(C=0.0001, class_weight=None, dual=True, fit_intercept=True,
     intercept_scaling=1, loss='squared_hinge', max_iter=10,
     multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
     verbose=0)
Scaler loaded successfully:
StandardScaler(copy=True, with_mean=True, with_std=True)
In [17]:
%%HTML
<a name="loc_window_search"></a>

Define the window search and conversion to bounding boxes

In [27]:
# WINDOW_SEARCH_RANGES = [
# #      {'window_size': (256,256), 'x_start_stop': [None, None], 'y_start_stop': [360, 740], 'xy_overlap':(0.8, 0.8)},
#      {'window_size': (192,192), 'x_start_stop': [None, None], 'y_start_stop': [360, 740], 'xy_overlap':(0.8, 0.8)},
#      {'window_size': (128,128), 'x_start_stop': [50, 1300], 'y_start_stop': [360, 600], 'xy_overlap':(0.8, 0.8)},
#      {'window_size': (64,64), 'x_start_stop': [200, 1200], 'y_start_stop': [360, 500], 'xy_overlap':(0.85, 0.85)},
# ]

WINDOW_SEARCH_RANGES = [
#      {'window_size': (256,256), 'x_start_stop': [None, None], 'y_start_stop': [360, 740], 'xy_overlap':(0.7, 0.7)},
    {'window_size': (192,192), 'x_start_stop': [0, 1280], 'y_start_stop': [360, 740], 'xy_overlap':(0.80, 0.90)},
    {'window_size': (160,160), 'x_start_stop': [0, 1280], 'y_start_stop': [300, 740], 'xy_overlap':(0.80, 0.90)},
    {'window_size': (128,128), 'x_start_stop': [0, 1280], 'y_start_stop': [350, 700], 'xy_overlap':(0.80, 0.90)},
    {'window_size': (96,96), 'x_start_stop': [200, 1200], 'y_start_stop': [400, 600], 'xy_overlap':(0.80, 0.90)},
    {'window_size': (64,64), 'x_start_stop': [200, 1200], 'y_start_stop': [400, 600], 'xy_overlap':(0.70, 0.80)},
]


# def get_all_windows(image):
#     all_windows = []
#     for search_range in WINDOW_SEARCH_RANGES:
#         windows = slide_window(xy_window=search_range['window_size'],
#                                x_start_stop=search_range['x_start_stop'], 
#                                y_start_stop=search_range['y_start_stop'], 
#                                xy_overlap=search_range['xy_overlap'])
#         all_windows.extend(windows)
#     return all_windows
class ImageWindowSearch:
    
    def __init__(self, image, classifier):
        
        self.image = image
        self.classifier = classifier
        

    def search_windows(self, windows, process_pool):

        # Search windows in parallel
        window_check_results = process_pool.map(self.check_window, windows)
#         window_check_results = map(self.check_window, windows)
        # Filter out the windows with no car in them   
        positive_windows = [check_result[1] for check_result in window_check_results if check_result[0] == True]
        # Return positively identified car windows
        return positive_windows
        
    def check_window(self, window):
        
        # Extract the test window from original image
        if window[1][1] < 0 or window[1][0] < 0 or window[0][1] < 0 or window[0][0] < 0:
            return (False, window)
        window_img = self.image[window[0][1]:window[1][1], window[0][0]:window[1][0]]
        if window_img.shape[0] == 0 or window_img.shape[1] == 0:
            return (False, window)
        # Classify
        is_vehicle = self.classifier.is_vehicle(window_img)
        return (is_vehicle, window)


def visualize_search_area():
    # Load an image
    files = glob('test_images/test*.jpg')
    image = mpimg.imread(files[0])

#     # Get all windows (get_all_windows)
#     all_windows = get_all_windows(image)
    
    # Draw the boxes (draw_boxes)
#     image_with_all_boxes = draw_boxes(image, all_windows)
    
    # Plot
    f, axes_arr = plt.subplots(len(WINDOW_SEARCH_RANGES), figsize=(24, 30))
    f.tight_layout()
    for idx, ax in enumerate(axes_arr):
        search_range = WINDOW_SEARCH_RANGES[idx]

        slider = PartitioningWindowSlider( 
            x_start_stop=search_range['x_start_stop'], 
            y_start_stop=search_range['y_start_stop'], 
            xy_window=search_range['window_size'],
            xy_overlap=search_range['xy_overlap'], 
            n_partitions=1)
        
        windows = slider.get_all_windows()
        
        image_with_bboxes = draw_boxes(image, windows)
        image_with_bboxes = draw_boxes(image_with_bboxes, [windows[0]], (255, 0, 255), thick=4)
        ax.set_title("Search range {}".format(search_range))
        ax.imshow(image_with_bboxes)

    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.1, hspace=0.2)
    plt.show()
    

visualize_search_area()

Visualize partitioning the search over N frames

In [28]:
def visualize_partitioned_search_area(n_frames, n_examples_skip, n_examples_show):
    # Load an image
    files = glob('test_images/test*.jpg')
    image = mpimg.imread(files[0])

    sliders = []
    for search_range in WINDOW_SEARCH_RANGES:
        slider = PartitioningWindowSlider( 
            x_start_stop=search_range['x_start_stop'], 
            y_start_stop=search_range['y_start_stop'], 
            xy_window=search_range['window_size'],
            xy_overlap=search_range['xy_overlap'], 
            n_partitions=n_frames)
        sliders.append(slider)
    
    
#     # Get all windows (get_all_windows)
#     all_windows = get_all_windows(image)
    
#     # Partition
#     partitioned_windows = partition_search_windows(all_windows, n_partitions=16) 
        
    # Plot
    f, axes_arr = plt.subplots(n_examples_show, figsize=(24, 30))
    f.tight_layout()
    for idx, ax in enumerate(axes_arr):
        partition_bboxes = []
        for slider in sliders:
            windows = slider.get_partition(idx + n_examples_skip)
            partition_bboxes.extend(windows)
        
        image_with_bboxes = draw_boxes(image, partition_bboxes)
        ax.set_title("Partition id {}".format(idx + n_examples_skip))
        ax.imshow(image_with_bboxes)

    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.1, hspace=0.2)
    plt.show()
    

visualize_partitioned_search_area(25, 20, 5)

Try classifier on provided test images

In [37]:
DETECTION_THRESHOLD = 6

def test_image_detection(image):
    draw_image = np.copy(image)

    # Prepare the sliding windows
    windows = []
    for search_range in WINDOW_SEARCH_RANGES:
        slider = WindowSlider( 
            x_start_stop=search_range['x_start_stop'], 
            y_start_stop=search_range['y_start_stop'], 
            xy_window=search_range['window_size'],
            xy_overlap=search_range['xy_overlap'])
        windows.extend(slider.get_all_windows())

    window_search = ImageWindowSearch(image, classifier)
    with multiprocessing.Pool(int(multiprocessing.cpu_count()/2)) as process_pool:
        hot_windows = window_search.search_windows(windows, process_pool)                       


    all_windows_img = draw_boxes(draw_image, windows)
    positive_labeled_windows = draw_boxes(draw_image, hot_windows)

    heatmap = WindowSlider.windows_to_heatmap(image.shape[0:2], hot_windows)
    heatmap_orig = np.copy(heatmap)
    # Threshold
    heatmap[heatmap <= DETECTION_THRESHOLD] = 0
    # Label separate detections
    labels = label(heatmap)
    # Calculate bounding boxes for labels
    detected_bboxes = PartitioningWindowSlider.get_labeled_bboxes(labels)
    # Draw bounding boxes on image
    if detected_bboxes is not None:
        img_with_bboxes = draw_boxes(image, detected_bboxes)
    else: 
        img_with_bboxes = image

    return (all_windows_img, positive_labeled_windows, heatmap_orig, heatmap, labels, img_with_bboxes)


# files = glob('test_images/problem_image.png')
# image = mpimg.imread(files[0])
# %prun test_image_detection(image)
# SETTING_TEST_CLASSIFIER_ON_TEST_IMAGES = alse

if SETTING_TEST_CLASSIFIER_ON_TEST_IMAGES:
    files = glob('test_images/test*.jpg')
    for file in files:

        image = mpimg.imread(file)
        (all_windows_img, positive_labeled_windows, heatmap_orig, heatmap, labels, img_with_bboxes) = test_image_detection(image)

        # Plot
        f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3, figsize=(24, 10))
        f.tight_layout()
        ax1.set_title('Scanned Windows', fontsize=30)
        ax1.imshow(all_windows_img)
        ax2.set_title('Positive Scans', fontsize=30)
        ax2.imshow(positive_labeled_windows)
        ax3.set_title('Heatmap before threshold', fontsize=30)
        ax3.imshow(heatmap_orig, cmap='gray')
        ax4.set_title('Heatmap after threshold', fontsize=30)
        ax4.imshow(heatmap, cmap='hot')
        ax5.set_title('Labels', fontsize=30)
        ax5.imshow(labels[0], cmap='gray')
        ax6.set_title('Final Detections', fontsize=30)
        ax6.imshow(img_with_bboxes)
        plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.1, hspace=0.2)
        plt.show()

Settings for Video Processing

In [21]:
TEST_VIDEO_INPUT_PATH = 'test_video.mp4'
TEST_VIDEO_OUTPUT_PATH = 'output_images/test_video_output.mov'

PROJECT_VIDEO_INPUT_PATH = 'project_video.mp4'
PROJECT_VIDEO_OUTPUT_PATH = 'output_images/project_video_output.mov'
PROJECT_HEATMAP_OUTPUT_PATH = 'output_images/project_video_heatmap.mov'
PROJECT_WINDOWS_OUTPUT_PATH = 'output_images/project_video_windows.mov'

SECONDS_TO_FULL_SCAN = 3
OVERLAP_THRESHOLD = 0.05
MIN_DETECTIONS_FOR_QUALITY_TRACKER = 5
MIN_DETECTIONS_FOR_TRACKER = 4

TRACKER_OVERLAP_HORIZ = 0.95
TRACKER_OVERLAP_VERT = 0.98
TRACKER_STEPS_PER_DIRECTION = 3
TRACKER_SCALEDOWN = 0.98
In [22]:
%%HTML
<a name="loc_tracker"></a>

Define the tracker

In [23]:
class VehicleTracker():
    
    def __init__(self, 
                 classifier, 
                 stable_id,
                 initial_frame, 
                 initial_rect, 
                 process_pool,
                 detection_threshold=0,
                 loss_threshold=0,
                 tracker_type="KCF"
                ):
        self.detect_rects_history = []
        self.classifier = classifier
        self.stable_id = stable_id
        self.tracker_type = tracker_type
        self.detect_rects_history.append(initial_rect)
        self.has_been_updated = False
        self.detections = 1
        self.losses = 0
        self.loss_threshold = loss_threshold
        self.detection_threshold = detection_threshold
        self.process_pool = process_pool
        self.reinit_internal_tracker(initial_frame, initial_rect)


    def reinit_internal_tracker(self, frame, rect):
        self.internal_tracker = cv2.Tracker_create(self.tracker_type)
        self.internal_tracker.init(frame, 
                                   VehicleTracker.rect_to_tracker_bbox(rect)) 
        
    
    def update(self, frame):
        
        detection = None
        # First try with an internal tracker
        ok, bbox = self.internal_tracker.update(frame)
        if ok:
            # Tracker succeeded, validate with classifier
            detection_rect = VehicleTracker.tracker_bbox_to_rect(bbox)
            is_vehicle = self.classify_detection(frame, detection_rect)
            if is_vehicle: 
                detection = detection_rect

        # If internal tracker failed to detect, or classifier disqualified the detection, perform nearby search
        if detection is None:
            # Tracker failed, we need to perform a search and reinitialize the tracker
            detection = self.perform_nearby_search(frame)
            # If nearby search failed, we give up
            if detection == None:
                return None
            else:
                # If we did find a new detection by proximity search, reinitialize the internal tracker
                self.reinit_internal_tracker(frame, detection)
            
        # Append to history and return
        self.detect_rects_history.append(detection)
        self.detections += 1
        self.has_been_updated = True
        return detection

    
    def perform_nearby_search(self, frame):
        # Search around previous detection
        last_detection_rect = self.detect_rects_history[-1]
        candidate_rects = VehicleTracker.get_candidates_for_nearby_search(last_detection_rect)
        candidate_search_windows = []
        for rect in candidate_rects:
            classify_bbox = ((rect.p1.x, rect.p1.y), (rect.p2.x, rect.p2.y))
            candidate_search_windows.append(classify_bbox)
        
        detections = detect_cars_in_frame(frame, 
                                          candidate_search_windows,
                                          self.classifier,
                                          self.process_pool
                                         )

        # Classifier approved
        valid_rects = []
        for detection in detections[0]:
#             print("Detection: ", detection)
            bbox = detection
#             print("BBOX: : ", bbox)
            rect = Rect(
                Point(bbox[0][0], bbox[0][1]), 
                Point(bbox[1][0], bbox[1][1]))
            if rect.get_height() == 0 or rect.get_width() == 0:
                continue
            valid_rects.append(rect)

        # If not valid matches found, then this tracker must have been lost
        if len(valid_rects) == 0:
            self.losses += 1
            if self.get_is_lost_or_rejected():
                return None
            else:
                return last_detection_rect

        # Choose closest matching rect to 3 previous detections
#         latest_detections = self.detect_rects_history[-3:]
#         total_rect_area = sum(rect.calculate_area() for rect in latest_detections) #, lambda rect: rect.calculate_area())/len(valid_rects)
#         avg_rect_area = total_rect_area / len(latest_detections)
        last_detection_area = self.detect_rects_history[-1].calculate_area()

        total_rect_area = sum(rect.calculate_area() for rect in  valid_rects) #, lambda rect: rect.calculate_area())/len(valid_rects)
        avg_rect_area = total_rect_area / len(valid_rects)
        
        target_rect_area = (last_detection_area + avg_rect_area)/2
        
        final_rect = min(valid_rects, key = lambda rect: (rect.calculate_area() - target_rect_area)**2)
        return final_rect
    
    def classify_detection(self, frame, detection_rect):
        detection_img = frame[
                detection_rect.get_top() : detection_rect.get_bottom(),
                detection_rect.get_left() : detection_rect.get_right()

            ]
        is_vehicle = classifier.is_vehicle(detection_img)
        return is_vehicle
    
        
    def get_candidates_for_nearby_search(rect):
        candidate_rects = []
        for step in range(1, TRACKER_STEPS_PER_DIRECTION):
            dx = rect.get_width()*(1-TRACKER_OVERLAP_HORIZ) * step
            dy = rect.get_width()*(1-TRACKER_OVERLAP_VERT) * step
            original = rect
            left = rect.shift(-dx, 0)
            right = rect.shift(dx, 0)
            above = rect.shift(0, -dy)
            below = rect.shift(0, dy)
            left_above = rect.shift(-dx, -dy)
            right_above = rect.shift(dx, -dy)
            left_below = rect.shift(-dx, dy)
            right_below = rect.shift(dx, dy)
            scale_up = Rect(rect.p2.shift(-dx, -dy), rect.p2.shift(dx, dy))
            candidate_rects.extend([original, left, right, above, below,
                                   left_above, right_above, left_below, right_below,
                                   scale_up])
            try:
                # Since scale down the way we do it might make the rectangle of negative dimension, it may raise
                scale_down = Rect(rect.p2.shift(dx, dy), rect.p2.shift(-dx, -dy))
                candidate_rects.append(scale_down)
            except:
                pass

        return candidate_rects

    def rect_to_tracker_bbox(rect):
        tracker_bbox = (rect.get_left(), 
                rect.get_top(),
                rect.get_width(),
                rect.get_height())
        return tracker_bbox
    
    def tracker_bbox_to_rect(tracker_bbox):
        # We perform saniziation here, to avoid coordinates below zero
        tracker_bbox = list(tracker_bbox) # Convert to list for writeability
        for i in range(0,4):
            if tracker_bbox[i] < 0:
                tracker_bbox[i] = 0
        return Rect(
            Point(tracker_bbox[0], tracker_bbox[1]),
            Point(tracker_bbox[0]+tracker_bbox[2], tracker_bbox[1]+tracker_bbox[3])
        )
    
        
#     def readjust_area(self, frame, rect):
#         self.tracker = cv2.Tracker_create("KCF")
#         tracker_bbox = (rect.get_left(), 
#                         rect.get_top(),
#                         rect.get_width(),
#                         rect.get_height())
#         self.tracker.init(frame, tracker_bbox)
    
    def get_stable_id(self):
        return self.stable_id
    
    def get_is_lost_or_rejected(self):
        return self.losses > self.loss_threshold
    
    def get_is_past_detection_threshold(self):
        return self.detections > self.detection_threshold
    
    def get_latest_detection(self):
        return self.detect_rects_history[-1]
        
    def get_detected_rects_history(self):
        return self.detect_rects_history
    
    def get_has_been_updated(self):
        return self.has_been_updated

Show how tracker will search around a bounding box

In [24]:
def visualize_tracker_search_area():
    # Load an image
    files = glob('test_images/test*.jpg')
    image = mpimg.imread(files[0])

    # Generate some fake matches
    matching_rects = []
    matching_rects.append(Rect(
        Point(100, 100),
        Point(164, 164)
    ))
    matching_rects.append(Rect(
        Point(200, 200),
        Point(328, 328)
    ))  
    matching_rects.append(Rect(
        Point(400, 400),
        Point(564, 564)
    ))
    
    
    # Plot
    f, axes_arr = plt.subplots(len(matching_rects), figsize=(24, 30))
    f.tight_layout()
    for idx, ax in enumerate(axes_arr):
        matching_rect = matching_rects[idx]
        candidate_rects = VehicleTracker.get_candidates_for_nearby_search(matching_rect)
        image_with_bboxes = draw_rects(image, candidate_rects, thick=2)
        image_with_bboxes = draw_rects(image_with_bboxes, [matching_rect], (255, 0, 255), thick=2)
        ax.set_title("Search range for match at {}".format(matching_rect))
        ax.imshow(image_with_bboxes)

    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.1, hspace=0.2)
    plt.show()
    

visualize_tracker_search_area()
In [25]:
%%HTML
<a name="loc_apply_pipeline"></a>
<h1>Apply pipeline to video</h1>

Apply pipeline to video

In [38]:
class PerformanceTracker:
    def __init__(self):
        self.frame_processing_times = []

    def append_processing_time(self, time_span):
        self.frame_processing_times.append(time_span)
        
    def print_stats(self):
        # Print statistics
        if len(self.frame_processing_times) == 0:
            return
        total_processing_time = sum(self.frame_processing_times)
        avg_processing_time = total_processing_time / len(self.frame_processing_times)
        max_processing_time = max(self.frame_processing_times)
        print('Processing times: total - {}   average - {}   max - {} '.format( 
              total_processing_time,
              avg_processing_time,
              max_processing_time
             ))
        

def detect_cars_in_frame(image, 
                         windows, 
                         classifier, 
                         process_pool):
    window_search = ImageWindowSearch(image, classifier)
    hot_windows = window_search.search_windows(windows, process_pool)                       

    heatmap = WindowSlider.windows_to_heatmap(image.shape[0:2], hot_windows)
    # Threshold
    heatmap[heatmap <= DETECTION_THRESHOLD] = 0
    # Label separate detections
    labels = label(heatmap)
    # Calculate bounding boxes for labels
    detected_bboxes = PartitioningWindowSlider.get_labeled_bboxes(labels)
    
    return detected_bboxes, heatmap

# class VideoProcessorConfig:
#     # TODO: Static state
#     # TODO: Frame boundaries
#     # TODO: Thresholds
#     pass

class FrameProcessorConfig:
    def __init__(self, 
                 classifier,
                 frame_size, 
                 video_fps, 
                 seconds_to_full_scan, 
                 window_search_ranges,
                 output_heatmap_frame, 
                 output_windows_frame
                ):
        self.classifier = classifier
        self.frame_size = frame_size
        # Partition sliding windows
        self.n_partitions = int(video_fps*seconds_to_full_scan)
        self.slider_group = PartitioningWindowSliderGroup(window_search_ranges, self.n_partitions)
        self.output_heatmap_frame = output_heatmap_frame
        self.output_windows_frame = output_windows_frame
        


class State:
    def __init__(self):
        self.frame_idx = 0
        self.previous_batch_detections = None
        self.trackers = []
        self.historical_lost_trackers = []
        self.vehicles_counter = 0
        
        


        
class FrameProcessor:
    def __init__(self, config):
        self.config = config
        self.process_pool = multiprocessing.Pool(int(multiprocessing.cpu_count()/2))

    def process_frame(self, 
                      frame,
                      state):
        # Get the bboxes to scan in this frame
        partition_idx = state.frame_idx % self.config.n_partitions
        scan_windows = self.config.slider_group.get_windows_for_partition(partition_idx)

        frame_detections, heatmap_frame = detect_cars_in_frame(
            frame,
            scan_windows,
            self.config.classifier, 
            self.process_pool
        )

        print("Frame {} detections: {}".format(state.frame_idx, len(frame_detections)))

        for detection in frame_detections:
            rect = Rect(
                Point(detection[0][0], detection[0][1]),
                Point(detection[1][0], detection[1][1]))
            if rect.get_height() == 0 or rect.get_width() == 0:
                continue
            current_detection_area = rect.calculate_area()

            # Check if there is overlap with an existing tracker
            overlapping_tracker_found = False
            for tracker in state.trackers:
                if tracker.get_is_lost_or_rejected() == True:
                    continue
                latest_detection = tracker.get_latest_detection()
                overlap = latest_detection.calculate_overlap(rect)
                if overlap == None:
                    continue
                overlap_area = overlap.calculate_area()
                if overlap_area/current_detection_area >= OVERLAP_THRESHOLD:
                    # Found an overlapping tracker, no need to record this detection
    #                         tracker.readjust_area(frame, rect)
                    overlapping_tracker_found = True
                    break
                elif overlap_area/latest_detection.calculate_area() >= OVERLAP_THRESHOLD:
                    # Overlap is large in comparison to previous detected area
                    # TODO: Consider switching to the larger area
    #                         tracker.readjust_area(frame, rect)
                    overlapping_tracker_found = True
                    break
            # No need to create a new tracker if the full scan window overlaps with an existing tracker
            if overlapping_tracker_found == True:
                continue

            state.vehicles_counter += 1
            tracker = VehicleTracker(self.config.classifier, 
                                     state.vehicles_counter, 
                                     frame, 
                                     rect, 
                                     self.process_pool)
            state.trackers.append(tracker)

        # We count on the trackers to provide us with detections
        # We validate detections against our own classifier
        # If the classifier rejects the detection, 
        frame_detections = []
        for tracker in state.trackers:
            rect = tracker.update(frame)
            if rect is not None:
                bbox = ((rect.p1.x, rect.p1.y), (rect.p2.x, rect.p2.y))

                # Only treat tracker seriously after it gained MIN_DETECTIONS_FOR_TRACKER
                # Assume that the tracker is gaining first detections
                if tracker.get_is_past_detection_threshold():
                    frame_detections.append(bbox)
    #             else:
    #                 print("Tracker {} LOST or REJECTED".format(tracker.get_stable_id()))

        # Move lost trackers to historical_lost_trackers
        lost_trackers = [tracker for tracker in state.trackers if tracker.get_is_lost_or_rejected()]
        state.historical_lost_trackers.extend(lost_trackers)
        state.trackers = [tracker for tracker in state.trackers if not tracker.get_is_lost_or_rejected()]

        # TODO: Go over each tracker, if there are more than 2 recent detections, draw the ID on the car

        # TODO: For lost trackers which had enough detections, we want to run nearby classifier search, to attempt a reacquire
        # Until then, we just run a full search on next frame if a high quality tracker was just lost
        quality_lost_trackers = [tracker for tracker in lost_trackers if len(tracker.get_detected_rects_history()) >= MIN_DETECTIONS_FOR_QUALITY_TRACKER ]
        if len(quality_lost_trackers) > 0:
            state.too_many_lost_trackers = True

        out_frame = draw_boxes(frame, frame_detections)
        
        if self.config.output_heatmap_frame and heatmap_frame is not None:
            # Convert to uint and add depth
            heatmap_frame = np.uint8(heatmap_frame)*10
            heatmap_frame = np.dstack((heatmap_frame, heatmap_frame, heatmap_frame)) 
        else:
            heatmap_frame = None
        if self.config.output_windows_frame:
            windows_frame = draw_boxes(frame, scan_windows)
        else:
            windows_frame = None

        return out_frame, heatmap_frame, windows_frame
        # TODO: 
        # 1. (2d) velocity and (2d) direction calculation. Can later be used for 3d calculation by transform to horizon



    
def process_video(video_input_path,
                  video_output_path, 
                  heatmap_output_path=None,  
                  windows_output_path=None,
                  frame_start=0,
                  frame_end=None):
    # Get info on the input video
    video = cv2.VideoCapture(video_input_path)
    video_fps = video.get(cv2.CAP_PROP_FPS)
    frame_size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    
    # Prepare video files
    # NOTE: Overwrites existing files if present
    if os.path.exists(video_output_path):
        os.remove(video_output_path)
    fourcc = cv2.VideoWriter_fourcc(*'avc1')
    video_out = cv2.VideoWriter(video_output_path, 
                                fourcc, 
                                fps=video_fps, 
                                frameSize=frame_size, 
                                isColor=True)   
    heatmap_out = None
    if heatmap_output_path is not None:
        if os.path.exists(heatmap_output_path):
            os.remove(heatmap_output_path)
        heatmap_fourcc = cv2.VideoWriter_fourcc(*'avc1')
        heatmap_out = cv2.VideoWriter(heatmap_output_path, 
                                        heatmap_fourcc, 
                                        fps=video_fps, 
                                        frameSize=frame_size, 
                                        isColor=True)   
    windows_out = None
    if windows_output_path is not None:
        if os.path.exists(windows_output_path):
            os.remove(windows_output_path)
        windows_fourcc = cv2.VideoWriter_fourcc(*'avc1')
        windows_out = cv2.VideoWriter(windows_output_path, 
                                        windows_fourcc, 
                                        fps=video_fps, 
                                        frameSize=frame_size, 
                                        isColor=True)   
    


    # Some state we manage in the process
    frame_processor_config = FrameProcessorConfig(
        classifier,
        frame_size, 
        video_fps,
        seconds_to_full_scan = SECONDS_TO_FULL_SCAN,
        window_search_ranges = WINDOW_SEARCH_RANGES, 
        output_heatmap_frame = (heatmap_out != None),
        output_windows_frame = (windows_out != None)
    )


    frame_processor = FrameProcessor(frame_processor_config)
    performance_tracker = PerformanceTracker()
    state = State()
    has_reached_end = False

    while(video.isOpened() and not has_reached_end):
        
        time_op_start = time.time()

        # Read a frame, make sure we're not at the end
        ret, frame = video.read()
        if ret == False:
            has_reached_end = True
            break
        state.frame_idx+=1

        if state.frame_idx < frame_start or (frame_end is not None and state.frame_idx > frame_end):
            continue

            
        # Process a frame
        out_frame, heatmap_frame, windows_frame = frame_processor.process_frame(frame, state)
        
        # Measure performance
        # The search vast majority of time (86%) is lost to Python's awful locking situation:
        # 244512 function calls in 201.634 seconds
        #     1757  174.521    0.099  174.521    0.099 {method 'acquire' of '_thread.lock' objects}
        time_op_end = time.time()
        performance_tracker.append_processing_time(time_op_end - time_op_start)
        if state.frame_idx % 10 == 0:
            print("Processed frame ", state.frame_idx)

            
        # Write video frames
        video_out.write(out_frame)
        if heatmap_frame is not None:
            heatmap_out.write(heatmap_frame)
        if windows_frame is not None:
            windows_out.write(windows_frame)
            



    # Print statistics
    performance_tracker.print_stats()


    print("Releasing everything")
    video.release()
    video_out.release()
    if heatmap_out:
        heatmap_out.release()
    if windows_out:
        windows_out.release()
        
    # cv2.destroyAllWindows() ro

def process_full_project_video():
    process_video(PROJECT_VIDEO_INPUT_PATH, 
              PROJECT_VIDEO_OUTPUT_PATH, 
    #               heatmap_output_path = PROJECT_HEATMAP_OUTPUT_PATH, 
    #               windows_output_path = PROJECT_WINDOWS_OUTPUT_PATH,
              heatmap_output_path = None,
              windows_output_path = None,
              frame_start=0,
              frame_end=None) 

def process_test_video():
    process_video(TEST_VIDEO_INPUT_PATH, TEST_VIDEO_OUTPUT_PATH)
    
def process_part_project_video():
    process_video(PROJECT_VIDEO_INPUT_PATH, 
                  PROJECT_VIDEO_OUTPUT_PATH, 
    #               heatmap_output_path = PROJECT_HEATMAP_OUTPUT_PATH, 
    #               windows_output_path = PROJECT_WINDOWS_OUTPUT_PATH,
                  heatmap_output_path = None,
                  windows_output_path = None,
                  frame_start = 200, 
                  frame_end = 400)

# %prun process_part_project_video()
process_full_project_video()
Frame 1 detections: 0
Frame 2 detections: 1
Frame 3 detections: 0
Frame 4 detections: 0
Frame 5 detections: 0
Frame 6 detections: 0
Frame 7 detections: 0
Frame 8 detections: 0
Frame 9 detections: 0
Frame 10 detections: 0
Processed frame  10
Frame 11 detections: 0
Frame 12 detections: 0
Frame 13 detections: 0
Frame 14 detections: 0
Frame 15 detections: 0
Frame 16 detections: 0
Frame 17 detections: 0
Frame 18 detections: 0
Frame 19 detections: 0
Frame 20 detections: 0
Processed frame  20
Frame 21 detections: 0
Frame 22 detections: 0
Frame 23 detections: 0
Frame 24 detections: 0
Frame 25 detections: 0
Frame 26 detections: 0
Frame 27 detections: 0
Frame 28 detections: 0
Frame 29 detections: 0
Frame 30 detections: 0
Processed frame  30
Frame 31 detections: 0
Frame 32 detections: 0
Frame 33 detections: 0
Frame 34 detections: 0
Frame 35 detections: 0
Frame 36 detections: 0
Frame 37 detections: 0
Frame 38 detections: 0
Frame 39 detections: 0
Frame 40 detections: 0
Processed frame  40
Frame 41 detections: 0
Frame 42 detections: 0
Frame 43 detections: 0
Frame 44 detections: 0
Frame 45 detections: 0
Frame 46 detections: 0
Frame 47 detections: 0
Frame 48 detections: 0
Frame 49 detections: 0
Frame 50 detections: 0
Processed frame  50
Frame 51 detections: 0
Frame 52 detections: 0
Frame 53 detections: 0
Frame 54 detections: 0
Frame 55 detections: 0
Frame 56 detections: 0
Frame 57 detections: 0
Frame 58 detections: 0
Frame 59 detections: 0
Frame 60 detections: 0
Processed frame  60
Frame 61 detections: 0
Frame 62 detections: 0
Frame 63 detections: 0
Frame 64 detections: 0
Frame 65 detections: 0
Frame 66 detections: 0
Frame 67 detections: 0
Frame 68 detections: 0
Frame 69 detections: 0
Frame 70 detections: 0
Processed frame  70
Frame 71 detections: 0
Frame 72 detections: 0
Frame 73 detections: 0
Frame 74 detections: 0
Frame 75 detections: 0
Frame 76 detections: 0
Frame 77 detections: 0
Frame 78 detections: 0
Frame 79 detections: 0
Frame 80 detections: 0
Processed frame  80
Frame 81 detections: 0
Frame 82 detections: 0
Frame 83 detections: 0
Frame 84 detections: 0
Frame 85 detections: 0
Frame 86 detections: 0
Frame 87 detections: 0
Frame 88 detections: 0
Frame 89 detections: 0
Frame 90 detections: 0
Processed frame  90
Frame 91 detections: 0
Frame 92 detections: 0
Frame 93 detections: 0
Frame 94 detections: 0
Frame 95 detections: 0
Frame 96 detections: 0
Frame 97 detections: 0
Frame 98 detections: 0
Frame 99 detections: 0
Frame 100 detections: 0
Processed frame  100
Frame 101 detections: 0
Frame 102 detections: 0
Frame 103 detections: 0
Frame 104 detections: 0
Frame 105 detections: 0
Frame 106 detections: 0
Frame 107 detections: 0
Frame 108 detections: 0
Frame 109 detections: 0
Frame 110 detections: 0
Processed frame  110
Frame 111 detections: 0
Frame 112 detections: 0
Frame 113 detections: 0
Frame 114 detections: 0
Frame 115 detections: 0
Frame 116 detections: 0
Frame 117 detections: 0
Frame 118 detections: 0
Frame 119 detections: 0
Frame 120 detections: 0
Processed frame  120
Frame 121 detections: 0
Frame 122 detections: 0
Frame 123 detections: 0
Frame 124 detections: 0
Frame 125 detections: 0
Frame 126 detections: 0
Frame 127 detections: 0
Frame 128 detections: 0
Frame 129 detections: 0
Frame 130 detections: 0
Processed frame  130
Frame 131 detections: 0
Frame 132 detections: 0
Frame 133 detections: 0
Frame 134 detections: 0
Frame 135 detections: 0
Frame 136 detections: 0
Frame 137 detections: 0
Frame 138 detections: 0
Frame 139 detections: 0
Frame 140 detections: 0
Processed frame  140
Frame 141 detections: 0
Frame 142 detections: 0
Frame 143 detections: 0
Frame 144 detections: 0
Frame 145 detections: 0
Frame 146 detections: 0
Frame 147 detections: 0
Frame 148 detections: 0
Frame 149 detections: 0
Frame 150 detections: 0
Processed frame  150
Frame 151 detections: 0
Frame 152 detections: 0
Frame 153 detections: 0
Frame 154 detections: 0
Frame 155 detections: 0
Frame 156 detections: 0
Frame 157 detections: 0
Frame 158 detections: 0
Frame 159 detections: 0
Frame 160 detections: 0
Processed frame  160
Frame 161 detections: 0
Frame 162 detections: 0
Frame 163 detections: 0
Frame 164 detections: 0
Frame 165 detections: 0
Frame 166 detections: 0
Frame 167 detections: 0
Frame 168 detections: 0
Frame 169 detections: 0
Frame 170 detections: 0
Processed frame  170
Frame 171 detections: 0
Frame 172 detections: 0
Frame 173 detections: 0
Frame 174 detections: 0
Frame 175 detections: 0
Frame 176 detections: 0
Frame 177 detections: 0
Frame 178 detections: 0
Frame 179 detections: 0
Frame 180 detections: 0
Processed frame  180
Frame 181 detections: 0
Frame 182 detections: 0
Frame 183 detections: 0
Frame 184 detections: 0
Frame 185 detections: 0
Frame 186 detections: 0
Frame 187 detections: 0
Frame 188 detections: 0
Frame 189 detections: 0
Frame 190 detections: 0
Processed frame  190
Frame 191 detections: 0
Frame 192 detections: 0
Frame 193 detections: 0
Frame 194 detections: 0
Frame 195 detections: 0
Frame 196 detections: 0
Frame 197 detections: 0
Frame 198 detections: 0
Frame 199 detections: 0
Frame 200 detections: 0
Processed frame  200
Frame 201 detections: 0
Frame 202 detections: 0
Frame 203 detections: 0
Frame 204 detections: 0
Frame 205 detections: 0
Frame 206 detections: 0
Frame 207 detections: 0
Frame 208 detections: 0
Frame 209 detections: 0
Frame 210 detections: 0
Processed frame  210
Frame 211 detections: 0
Frame 212 detections: 0
Frame 213 detections: 1
Frame 214 detections: 0
Frame 215 detections: 0
Frame 216 detections: 1
Frame 217 detections: 1
Frame 218 detections: 0
Frame 219 detections: 0
Frame 220 detections: 0
Processed frame  220
Frame 221 detections: 0
Frame 222 detections: 0
Frame 223 detections: 0
Frame 224 detections: 0
Frame 225 detections: 0
Frame 226 detections: 0
Frame 227 detections: 0
Frame 228 detections: 0
Frame 229 detections: 0
Frame 230 detections: 0
Processed frame  230
Frame 231 detections: 0
Frame 232 detections: 0
Frame 233 detections: 0
Frame 234 detections: 0
Frame 235 detections: 0
Frame 236 detections: 0
Frame 237 detections: 0
Frame 238 detections: 0
Frame 239 detections: 0
Frame 240 detections: 0
Processed frame  240
Frame 241 detections: 0
Frame 242 detections: 0
Frame 243 detections: 0
Frame 244 detections: 0
Frame 245 detections: 0
Frame 246 detections: 0
Frame 247 detections: 0
Frame 248 detections: 0
Frame 249 detections: 0
Frame 250 detections: 0
Processed frame  250
Frame 251 detections: 0
Frame 252 detections: 0
Frame 253 detections: 0
Frame 254 detections: 0
Frame 255 detections: 0
Frame 256 detections: 0
Frame 257 detections: 0
Frame 258 detections: 0
Frame 259 detections: 0
Frame 260 detections: 0
Processed frame  260
Frame 261 detections: 0
Frame 262 detections: 0
Frame 263 detections: 0
Frame 264 detections: 0
Frame 265 detections: 0
Frame 266 detections: 0
Frame 267 detections: 0
Frame 268 detections: 0
Frame 269 detections: 0
Frame 270 detections: 0
Processed frame  270
Frame 271 detections: 0
Frame 272 detections: 0
Frame 273 detections: 0
Frame 274 detections: 0
Frame 275 detections: 0
Frame 276 detections: 0
Frame 277 detections: 0
Frame 278 detections: 0
Frame 279 detections: 0
Frame 280 detections: 0
Processed frame  280
Frame 281 detections: 0
Frame 282 detections: 0
Frame 283 detections: 1
Frame 284 detections: 0
Frame 285 detections: 0
Frame 286 detections: 1
Frame 287 detections: 1
Frame 288 detections: 1
Frame 289 detections: 1
Frame 290 detections: 1
Processed frame  290
Frame 291 detections: 0
Frame 292 detections: 0
Frame 293 detections: 0
Frame 294 detections: 0
Frame 295 detections: 0
Frame 296 detections: 0
Frame 297 detections: 0
Frame 298 detections: 0
Frame 299 detections: 0
Frame 300 detections: 0
Processed frame  300
Frame 301 detections: 0
Frame 302 detections: 0
Frame 303 detections: 0
Frame 304 detections: 0
Frame 305 detections: 0
Frame 306 detections: 0
Frame 307 detections: 0
Frame 308 detections: 0
Frame 309 detections: 0
Frame 310 detections: 0
Processed frame  310
Frame 311 detections: 0
Frame 312 detections: 0
Frame 313 detections: 0
Frame 314 detections: 0
Frame 315 detections: 0
Frame 316 detections: 0
Frame 317 detections: 0
Frame 318 detections: 0
Frame 319 detections: 0
Frame 320 detections: 0
Processed frame  320
Frame 321 detections: 0
Frame 322 detections: 0
Frame 323 detections: 0
Frame 324 detections: 0
Frame 325 detections: 0
Frame 326 detections: 0
Frame 327 detections: 0
Frame 328 detections: 0
Frame 329 detections: 0
Frame 330 detections: 0
Processed frame  330
Frame 331 detections: 0
Frame 332 detections: 0
Frame 333 detections: 0
Frame 334 detections: 0
Frame 335 detections: 0
Frame 336 detections: 0
Frame 337 detections: 0
Frame 338 detections: 0
Frame 339 detections: 0
Frame 340 detections: 0
Processed frame  340
Frame 341 detections: 0
Frame 342 detections: 0
Frame 343 detections: 0
Frame 344 detections: 0
Frame 345 detections: 0
Frame 346 detections: 0
Frame 347 detections: 0
Frame 348 detections: 0
Frame 349 detections: 0
Frame 350 detections: 0
Processed frame  350
Frame 351 detections: 0
Frame 352 detections: 0
Frame 353 detections: 0
Frame 354 detections: 0
Frame 355 detections: 0
Frame 356 detections: 0
Frame 357 detections: 0
Frame 358 detections: 1
Frame 359 detections: 1
Frame 360 detections: 1
Processed frame  360
Frame 361 detections: 0
Frame 362 detections: 0
Frame 363 detections: 0
Frame 364 detections: 1
Frame 365 detections: 1
Frame 366 detections: 0
Frame 367 detections: 0
Frame 368 detections: 0
Frame 369 detections: 0
Frame 370 detections: 0
Processed frame  370
Frame 371 detections: 0
Frame 372 detections: 0
Frame 373 detections: 0
Frame 374 detections: 0
Frame 375 detections: 1
Frame 376 detections: 0
Frame 377 detections: 0
Frame 378 detections: 0
Frame 379 detections: 0
Frame 380 detections: 0
Processed frame  380
Frame 381 detections: 0
Frame 382 detections: 0
Frame 383 detections: 0
Frame 384 detections: 0
Frame 385 detections: 0
Frame 386 detections: 0
Frame 387 detections: 0
Frame 388 detections: 0
Frame 389 detections: 0
Frame 390 detections: 0
Processed frame  390
Frame 391 detections: 0
Frame 392 detections: 0
Frame 393 detections: 0
Frame 394 detections: 0
Frame 395 detections: 0
Frame 396 detections: 0
Frame 397 detections: 0
Frame 398 detections: 0
Frame 399 detections: 0
Frame 400 detections: 0
Processed frame  400
Frame 401 detections: 0
Frame 402 detections: 0
Frame 403 detections: 0
Frame 404 detections: 0
Frame 405 detections: 0
Frame 406 detections: 0
Frame 407 detections: 0
Frame 408 detections: 0
Frame 409 detections: 0
Frame 410 detections: 0
Processed frame  410
Frame 411 detections: 0
Frame 412 detections: 0
Frame 413 detections: 0
Frame 414 detections: 0
Frame 415 detections: 0
Frame 416 detections: 0
Frame 417 detections: 0
Frame 418 detections: 0
Frame 419 detections: 0
Frame 420 detections: 0
Processed frame  420
Frame 421 detections: 0
Frame 422 detections: 0
Frame 423 detections: 0
Frame 424 detections: 0
Frame 425 detections: 0
Frame 426 detections: 0
Frame 427 detections: 0
Frame 428 detections: 0
Frame 429 detections: 1
Frame 430 detections: 0
Processed frame  430
Frame 431 detections: 0
Frame 432 detections: 0
Frame 433 detections: 0
Frame 434 detections: 0
Frame 435 detections: 0
Frame 436 detections: 0
Frame 437 detections: 0
Frame 438 detections: 0
Frame 439 detections: 0
Frame 440 detections: 0
Processed frame  440
Frame 441 detections: 0
Frame 442 detections: 0
Frame 443 detections: 0
Frame 444 detections: 0
Frame 445 detections: 0
Frame 446 detections: 0
Frame 447 detections: 0
Frame 448 detections: 0
Frame 449 detections: 0
Frame 450 detections: 0
Processed frame  450
Frame 451 detections: 0
Frame 452 detections: 0
Frame 453 detections: 0
Frame 454 detections: 0
Frame 455 detections: 0
Frame 456 detections: 0
Frame 457 detections: 0
Frame 458 detections: 0
Frame 459 detections: 0
Frame 460 detections: 0
Processed frame  460
Frame 461 detections: 0
Frame 462 detections: 0
Frame 463 detections: 0
Frame 464 detections: 0
Frame 465 detections: 0
Frame 466 detections: 0
Frame 467 detections: 0
Frame 468 detections: 0
Frame 469 detections: 0
Frame 470 detections: 0
Processed frame  470
Frame 471 detections: 0
Frame 472 detections: 0
Frame 473 detections: 0
Frame 474 detections: 0
Frame 475 detections: 0
Frame 476 detections: 0
Frame 477 detections: 0
Frame 478 detections: 0
Frame 479 detections: 0
Frame 480 detections: 0
Processed frame  480
Frame 481 detections: 0
Frame 482 detections: 0
Frame 483 detections: 0
Frame 484 detections: 0
Frame 485 detections: 0
Frame 486 detections: 0
Frame 487 detections: 0
Frame 488 detections: 0
Frame 489 detections: 0
Frame 490 detections: 0
Processed frame  490
Frame 491 detections: 0
Frame 492 detections: 0
Frame 493 detections: 0
Frame 494 detections: 0
Frame 495 detections: 0
Frame 496 detections: 0
Frame 497 detections: 0
Frame 498 detections: 0
Frame 499 detections: 0
Frame 500 detections: 0
Processed frame  500
Frame 501 detections: 0
Frame 502 detections: 0
Frame 503 detections: 0
Frame 504 detections: 0
Frame 505 detections: 0
Frame 506 detections: 0
Frame 507 detections: 0
Frame 508 detections: 0
Frame 509 detections: 0
Frame 510 detections: 0
Processed frame  510
Frame 511 detections: 0
Frame 512 detections: 0
Frame 513 detections: 0
Frame 514 detections: 0
Frame 515 detections: 0
Frame 516 detections: 0
Frame 517 detections: 0
Frame 518 detections: 0
Frame 519 detections: 0
Frame 520 detections: 0
Processed frame  520
Frame 521 detections: 0
Frame 522 detections: 0
Frame 523 detections: 0
Frame 524 detections: 0
Frame 525 detections: 0
Frame 526 detections: 0
Frame 527 detections: 0
Frame 528 detections: 0
Frame 529 detections: 0
Frame 530 detections: 0
Processed frame  530
Frame 531 detections: 0
Frame 532 detections: 0
Frame 533 detections: 0
Frame 534 detections: 0
Frame 535 detections: 0
Frame 536 detections: 0
Frame 537 detections: 0
Frame 538 detections: 0
Frame 539 detections: 0
Frame 540 detections: 0
Processed frame  540
Frame 541 detections: 0
Frame 542 detections: 0
Frame 543 detections: 0
Frame 544 detections: 0
Frame 545 detections: 0
Frame 546 detections: 0
Frame 547 detections: 0
Frame 548 detections: 0
Frame 549 detections: 0
Frame 550 detections: 0
Processed frame  550
Frame 551 detections: 0
Frame 552 detections: 0
Frame 553 detections: 0
Frame 554 detections: 0
Frame 555 detections: 0
Frame 556 detections: 0
Frame 557 detections: 0
Frame 558 detections: 0
Frame 559 detections: 0
Frame 560 detections: 0
Processed frame  560
Frame 561 detections: 0
Frame 562 detections: 0
Frame 563 detections: 0
Frame 564 detections: 0
Frame 565 detections: 0
Frame 566 detections: 0
Frame 567 detections: 0
Frame 568 detections: 0
Frame 569 detections: 0
Frame 570 detections: 0
Processed frame  570
Frame 571 detections: 0
Frame 572 detections: 0
Frame 573 detections: 0
Frame 574 detections: 0
Frame 575 detections: 0
Frame 576 detections: 0
Frame 577 detections: 0
Frame 578 detections: 0
Frame 579 detections: 0
Frame 580 detections: 0
Processed frame  580
Frame 581 detections: 0
Frame 582 detections: 0
Frame 583 detections: 0
Frame 584 detections: 0
Frame 585 detections: 0
Frame 586 detections: 0
Frame 587 detections: 0
Frame 588 detections: 0
Frame 589 detections: 0
Frame 590 detections: 0
Processed frame  590
Frame 591 detections: 0
Frame 592 detections: 0
Frame 593 detections: 0
Frame 594 detections: 0
Frame 595 detections: 0
Frame 596 detections: 0
Frame 597 detections: 0
Frame 598 detections: 0
Frame 599 detections: 0
Frame 600 detections: 0
Processed frame  600
Frame 601 detections: 0
Frame 602 detections: 0
Frame 603 detections: 0
Frame 604 detections: 0
Frame 605 detections: 0
Frame 606 detections: 0
Frame 607 detections: 0
Frame 608 detections: 0
Frame 609 detections: 0
Frame 610 detections: 0
Processed frame  610
Frame 611 detections: 0
Frame 612 detections: 0
Frame 613 detections: 0
Frame 614 detections: 0
Frame 615 detections: 0
Frame 616 detections: 0
Frame 617 detections: 0
Frame 618 detections: 0
Frame 619 detections: 0
Frame 620 detections: 0
Processed frame  620
Frame 621 detections: 0
Frame 622 detections: 0
Frame 623 detections: 0
Frame 624 detections: 0
Frame 625 detections: 0
Frame 626 detections: 0
Frame 627 detections: 0
Frame 628 detections: 0
Frame 629 detections: 0
Frame 630 detections: 0
Processed frame  630
Frame 631 detections: 0
Frame 632 detections: 0
Frame 633 detections: 0
Frame 634 detections: 0
Frame 635 detections: 0
Frame 636 detections: 0
Frame 637 detections: 0
Frame 638 detections: 0
Frame 639 detections: 0
Frame 640 detections: 0
Processed frame  640
Frame 641 detections: 0
Frame 642 detections: 0
Frame 643 detections: 0
Frame 644 detections: 0
Frame 645 detections: 0
Frame 646 detections: 0
Frame 647 detections: 0
Frame 648 detections: 0
Frame 649 detections: 0
Frame 650 detections: 0
Processed frame  650
Frame 651 detections: 0
Frame 652 detections: 0
Frame 653 detections: 0
Frame 654 detections: 0
Frame 655 detections: 0
Frame 656 detections: 0
Frame 657 detections: 0
Frame 658 detections: 0
Frame 659 detections: 0
Frame 660 detections: 0
Processed frame  660
Frame 661 detections: 0
Frame 662 detections: 0
Frame 663 detections: 0
Frame 664 detections: 0
Frame 665 detections: 0
Frame 666 detections: 0
Frame 667 detections: 0
Frame 668 detections: 0
Frame 669 detections: 0
Frame 670 detections: 0
Processed frame  670
Frame 671 detections: 0
Frame 672 detections: 0
Frame 673 detections: 0
Frame 674 detections: 0
Frame 675 detections: 0
Frame 676 detections: 0
Frame 677 detections: 0
Frame 678 detections: 0
Frame 679 detections: 0
Frame 680 detections: 0
Processed frame  680
Frame 681 detections: 0
Frame 682 detections: 0
Frame 683 detections: 0
Frame 684 detections: 0
Frame 685 detections: 0
Frame 686 detections: 0
Frame 687 detections: 0
Frame 688 detections: 0
Frame 689 detections: 0
Frame 690 detections: 0
Processed frame  690
Frame 691 detections: 0
Frame 692 detections: 0
Frame 693 detections: 0
Frame 694 detections: 0
Frame 695 detections: 0
Frame 696 detections: 0
Frame 697 detections: 0
Frame 698 detections: 0
Frame 699 detections: 0
Frame 700 detections: 0
Processed frame  700
Frame 701 detections: 0
Frame 702 detections: 0
Frame 703 detections: 0
Frame 704 detections: 0
Frame 705 detections: 0
Frame 706 detections: 0
Frame 707 detections: 0
Frame 708 detections: 0
Frame 709 detections: 0
Frame 710 detections: 0
Processed frame  710
Frame 711 detections: 0
Frame 712 detections: 0
Frame 713 detections: 0
Frame 714 detections: 0
Frame 715 detections: 0
Frame 716 detections: 0
Frame 717 detections: 0
Frame 718 detections: 0
Frame 719 detections: 0
Frame 720 detections: 0
Processed frame  720
Frame 721 detections: 0
Frame 722 detections: 0
Frame 723 detections: 0
Frame 724 detections: 0
Frame 725 detections: 0
Frame 726 detections: 0
Frame 727 detections: 0
Frame 728 detections: 0
Frame 729 detections: 0
Frame 730 detections: 0
Processed frame  730
Frame 731 detections: 2
Frame 732 detections: 0
Frame 733 detections: 0
Frame 734 detections: 0
Frame 735 detections: 0
Frame 736 detections: 0
Frame 737 detections: 0
Frame 738 detections: 1
Frame 739 detections: 0
Frame 740 detections: 0
Processed frame  740
Frame 741 detections: 0
Frame 742 detections: 0
Frame 743 detections: 1
Frame 744 detections: 0
Frame 745 detections: 0
Frame 746 detections: 0
Frame 747 detections: 0
Frame 748 detections: 0
Frame 749 detections: 0
Frame 750 detections: 0
Processed frame  750
Frame 751 detections: 0
Frame 752 detections: 0
Frame 753 detections: 0
Frame 754 detections: 0
Frame 755 detections: 0
Frame 756 detections: 0
Frame 757 detections: 0
Frame 758 detections: 0
Frame 759 detections: 0
Frame 760 detections: 0
Processed frame  760
Frame 761 detections: 0
Frame 762 detections: 0
Frame 763 detections: 0
Frame 764 detections: 0
Frame 765 detections: 0
Frame 766 detections: 0
Frame 767 detections: 0
Frame 768 detections: 0
Frame 769 detections: 0
Frame 770 detections: 0
Processed frame  770
Frame 771 detections: 0
Frame 772 detections: 0
Frame 773 detections: 0
Frame 774 detections: 0
Frame 775 detections: 0
Frame 776 detections: 0
Frame 777 detections: 0
Frame 778 detections: 0
Frame 779 detections: 0
Frame 780 detections: 0
Processed frame  780
Frame 781 detections: 0
Frame 782 detections: 0
Frame 783 detections: 0
Frame 784 detections: 0
Frame 785 detections: 0
Frame 786 detections: 0
Frame 787 detections: 0
Frame 788 detections: 0
Frame 789 detections: 0
Frame 790 detections: 0
Processed frame  790
Frame 791 detections: 0
Frame 792 detections: 0
Frame 793 detections: 0
Frame 794 detections: 0
Frame 795 detections: 0
Frame 796 detections: 0
Frame 797 detections: 0
Frame 798 detections: 0
Frame 799 detections: 0
Frame 800 detections: 0
Processed frame  800
Frame 801 detections: 0
Frame 802 detections: 0
Frame 803 detections: 0
Frame 804 detections: 1
Frame 805 detections: 1
Frame 806 detections: 1
Frame 807 detections: 0
Frame 808 detections: 1
Frame 809 detections: 1
Frame 810 detections: 1
Processed frame  810
Frame 811 detections: 0
Frame 812 detections: 0
Frame 813 detections: 0
Frame 814 detections: 0
Frame 815 detections: 0
Frame 816 detections: 0
Frame 817 detections: 0
Frame 818 detections: 0
Frame 819 detections: 0
Frame 820 detections: 0
Processed frame  820
Frame 821 detections: 0
Frame 822 detections: 0
Frame 823 detections: 0
Frame 824 detections: 0
Frame 825 detections: 0
Frame 826 detections: 0
Frame 827 detections: 0
Frame 828 detections: 0
Frame 829 detections: 0
Frame 830 detections: 0
Processed frame  830
Frame 831 detections: 0
Frame 832 detections: 0
Frame 833 detections: 0
Frame 834 detections: 0
Frame 835 detections: 0
Frame 836 detections: 0
Frame 837 detections: 0
Frame 838 detections: 0
Frame 839 detections: 0
Frame 840 detections: 0
Processed frame  840
Frame 841 detections: 0
Frame 842 detections: 0
Frame 843 detections: 0
Frame 844 detections: 0
Frame 845 detections: 0
Frame 846 detections: 0
Frame 847 detections: 0
Frame 848 detections: 0
Frame 849 detections: 0
Frame 850 detections: 0
Processed frame  850
Frame 851 detections: 0
Frame 852 detections: 0
Frame 853 detections: 0
Frame 854 detections: 0
Frame 855 detections: 0
Frame 856 detections: 0
Frame 857 detections: 0
Frame 858 detections: 0
Frame 859 detections: 0
Frame 860 detections: 0
Processed frame  860
Frame 861 detections: 0
Frame 862 detections: 0
Frame 863 detections: 0
Frame 864 detections: 0
Frame 865 detections: 0
Frame 866 detections: 0
Frame 867 detections: 0
Frame 868 detections: 0
Frame 869 detections: 0
Frame 870 detections: 0
Processed frame  870
Frame 871 detections: 0
Frame 872 detections: 0
Frame 873 detections: 0
Frame 874 detections: 1
Frame 875 detections: 1
Frame 876 detections: 1
Frame 877 detections: 1
Frame 878 detections: 0
Frame 879 detections: 2
Frame 880 detections: 0
Processed frame  880
Frame 881 detections: 0
Frame 882 detections: 0
Frame 883 detections: 0
Frame 884 detections: 0
Frame 885 detections: 0
Frame 886 detections: 0
Frame 887 detections: 0
Frame 888 detections: 0
Frame 889 detections: 0
Frame 890 detections: 0
Processed frame  890
Frame 891 detections: 0
Frame 892 detections: 0
Frame 893 detections: 0
Frame 894 detections: 0
Frame 895 detections: 0
Frame 896 detections: 0
Frame 897 detections: 0
Frame 898 detections: 0
Frame 899 detections: 0
Frame 900 detections: 0
Processed frame  900
Frame 901 detections: 0
Frame 902 detections: 0
Frame 903 detections: 0
Frame 904 detections: 0
Frame 905 detections: 0
Frame 906 detections: 0
Frame 907 detections: 0
Frame 908 detections: 0
Frame 909 detections: 0
Frame 910 detections: 0
Processed frame  910
Frame 911 detections: 0
Frame 912 detections: 0
Frame 913 detections: 0
Frame 914 detections: 0
Frame 915 detections: 0
Frame 916 detections: 0
Frame 917 detections: 0
Frame 918 detections: 0
Frame 919 detections: 0
Frame 920 detections: 0
Processed frame  920
Frame 921 detections: 0
Frame 922 detections: 0
Frame 923 detections: 0
Frame 924 detections: 0
Frame 925 detections: 0
Frame 926 detections: 0
Frame 927 detections: 0
Frame 928 detections: 0
Frame 929 detections: 0
Frame 930 detections: 0
Processed frame  930
Frame 931 detections: 0
Frame 932 detections: 0
Frame 933 detections: 0
Frame 934 detections: 0
Frame 935 detections: 0
Frame 936 detections: 0
Frame 937 detections: 0
Frame 938 detections: 0
Frame 939 detections: 0
Frame 940 detections: 0
Processed frame  940
Frame 941 detections: 0
Frame 942 detections: 0
Frame 943 detections: 0
Frame 944 detections: 0
Frame 945 detections: 0
Frame 946 detections: 1
Frame 947 detections: 1
Frame 948 detections: 1
Frame 949 detections: 0
Frame 950 detections: 0
Processed frame  950
Frame 951 detections: 0
Frame 952 detections: 0
Frame 953 detections: 0
Frame 954 detections: 0
Frame 955 detections: 0
Frame 956 detections: 0
Frame 957 detections: 0
Frame 958 detections: 0
Frame 959 detections: 1
Frame 960 detections: 1
Processed frame  960
Frame 961 detections: 1
Frame 962 detections: 0
Frame 963 detections: 1
Frame 964 detections: 0
Frame 965 detections: 0
Frame 966 detections: 0
Frame 967 detections: 1
Frame 968 detections: 0
Frame 969 detections: 0
Frame 970 detections: 0
Processed frame  970
Frame 971 detections: 0
Frame 972 detections: 0
Frame 973 detections: 0
Frame 974 detections: 0
Frame 975 detections: 0
Frame 976 detections: 0
Frame 977 detections: 0
Frame 978 detections: 0
Frame 979 detections: 0
Frame 980 detections: 0
Processed frame  980
Frame 981 detections: 1
Frame 982 detections: 0
Frame 983 detections: 0
Frame 984 detections: 0
Frame 985 detections: 0
Frame 986 detections: 0
Frame 987 detections: 0
Frame 988 detections: 0
Frame 989 detections: 0
Frame 990 detections: 0
Processed frame  990
Frame 991 detections: 0
Frame 992 detections: 0
Frame 993 detections: 0
Frame 994 detections: 0
Frame 995 detections: 0
Frame 996 detections: 0
Frame 997 detections: 0
Frame 998 detections: 0
Frame 999 detections: 0
Frame 1000 detections: 0
Processed frame  1000
Frame 1001 detections: 0
Frame 1002 detections: 0
Frame 1003 detections: 0
Frame 1004 detections: 0
Frame 1005 detections: 0
Frame 1006 detections: 0
Frame 1007 detections: 0
Frame 1008 detections: 0
Frame 1009 detections: 0
Frame 1010 detections: 0
Processed frame  1010
Frame 1011 detections: 0
Frame 1012 detections: 0
Frame 1013 detections: 0
Frame 1014 detections: 0
Frame 1015 detections: 0
Frame 1016 detections: 0
Frame 1017 detections: 0
Frame 1018 detections: 0
Frame 1019 detections: 0
Frame 1020 detections: 0
Processed frame  1020
Frame 1021 detections: 0
Frame 1022 detections: 0
Frame 1023 detections: 0
Frame 1024 detections: 0
Frame 1025 detections: 0
Frame 1026 detections: 0
Frame 1027 detections: 0
Frame 1028 detections: 0
Frame 1029 detections: 0
Frame 1030 detections: 0
Processed frame  1030
Frame 1031 detections: 0
Frame 1032 detections: 0
Frame 1033 detections: 0
Frame 1034 detections: 0
Frame 1035 detections: 0
Frame 1036 detections: 0
Frame 1037 detections: 0
Frame 1038 detections: 1
Frame 1039 detections: 0
Frame 1040 detections: 0
Processed frame  1040
Frame 1041 detections: 0
Frame 1042 detections: 0
Frame 1043 detections: 0
Frame 1044 detections: 0
Frame 1045 detections: 0
Frame 1046 detections: 0
Frame 1047 detections: 0
Frame 1048 detections: 0
Frame 1049 detections: 0
Frame 1050 detections: 0
Processed frame  1050
Frame 1051 detections: 0
Frame 1052 detections: 0
Frame 1053 detections: 0
Frame 1054 detections: 0
Frame 1055 detections: 0
Frame 1056 detections: 0
Frame 1057 detections: 0
Frame 1058 detections: 0
Frame 1059 detections: 0
Frame 1060 detections: 0
Processed frame  1060
Frame 1061 detections: 0
Frame 1062 detections: 0
Frame 1063 detections: 0
Frame 1064 detections: 0
Frame 1065 detections: 0
Frame 1066 detections: 0
Frame 1067 detections: 0
Frame 1068 detections: 0
Frame 1069 detections: 0
Frame 1070 detections: 0
Processed frame  1070
Frame 1071 detections: 0
Frame 1072 detections: 0
Frame 1073 detections: 0
Frame 1074 detections: 0
Frame 1075 detections: 0
Frame 1076 detections: 0
Frame 1077 detections: 0
Frame 1078 detections: 0
Frame 1079 detections: 0
Frame 1080 detections: 0
Processed frame  1080
Frame 1081 detections: 0
Frame 1082 detections: 0
Frame 1083 detections: 0
Frame 1084 detections: 0
Frame 1085 detections: 0
Frame 1086 detections: 0
Frame 1087 detections: 0
Frame 1088 detections: 0
Frame 1089 detections: 0
Frame 1090 detections: 0
Processed frame  1090
Frame 1091 detections: 0
Frame 1092 detections: 0
Frame 1093 detections: 0
Frame 1094 detections: 0
Frame 1095 detections: 0
Frame 1096 detections: 0
Frame 1097 detections: 0
Frame 1098 detections: 0
Frame 1099 detections: 0
Frame 1100 detections: 0
Processed frame  1100
Frame 1101 detections: 0
Frame 1102 detections: 0
Frame 1103 detections: 0
Frame 1104 detections: 0
Frame 1105 detections: 0
Frame 1106 detections: 0
Frame 1107 detections: 0
Frame 1108 detections: 0
Frame 1109 detections: 0
Frame 1110 detections: 0
Processed frame  1110
Frame 1111 detections: 0
Frame 1112 detections: 0
Frame 1113 detections: 0
Frame 1114 detections: 0
Frame 1115 detections: 0
Frame 1116 detections: 1
Frame 1117 detections: 0
Frame 1118 detections: 0
Frame 1119 detections: 0
Frame 1120 detections: 0
Processed frame  1120
Frame 1121 detections: 0
Frame 1122 detections: 0
Frame 1123 detections: 0
Frame 1124 detections: 0
Frame 1125 detections: 0
Frame 1126 detections: 0
Frame 1127 detections: 0
Frame 1128 detections: 0
Frame 1129 detections: 0
Frame 1130 detections: 0
Processed frame  1130
Frame 1131 detections: 0
Frame 1132 detections: 0
Frame 1133 detections: 0
Frame 1134 detections: 0
Frame 1135 detections: 0
Frame 1136 detections: 0
Frame 1137 detections: 0
Frame 1138 detections: 0
Frame 1139 detections: 0
Frame 1140 detections: 0
Processed frame  1140
Frame 1141 detections: 0
Frame 1142 detections: 0
Frame 1143 detections: 0
Frame 1144 detections: 0
Frame 1145 detections: 0
Frame 1146 detections: 0
Frame 1147 detections: 0
Frame 1148 detections: 0
Frame 1149 detections: 0
Frame 1150 detections: 0
Processed frame  1150
Frame 1151 detections: 0
Frame 1152 detections: 0
Frame 1153 detections: 0
Frame 1154 detections: 0
Frame 1155 detections: 0
Frame 1156 detections: 0
Frame 1157 detections: 0
Frame 1158 detections: 0
Frame 1159 detections: 0
Frame 1160 detections: 0
Processed frame  1160
Frame 1161 detections: 0
Frame 1162 detections: 0
Frame 1163 detections: 0
Frame 1164 detections: 0
Frame 1165 detections: 0
Frame 1166 detections: 0
Frame 1167 detections: 0
Frame 1168 detections: 0
Frame 1169 detections: 0
Frame 1170 detections: 0
Processed frame  1170
Frame 1171 detections: 0
Frame 1172 detections: 0
Frame 1173 detections: 0
Frame 1174 detections: 0
Frame 1175 detections: 0
Frame 1176 detections: 0
Frame 1177 detections: 0
Frame 1178 detections: 0
Frame 1179 detections: 0
Frame 1180 detections: 0
Processed frame  1180
Frame 1181 detections: 0
Frame 1182 detections: 0
Frame 1183 detections: 0
Frame 1184 detections: 0
Frame 1185 detections: 0
Frame 1186 detections: 0
Frame 1187 detections: 0
Frame 1188 detections: 0
Frame 1189 detections: 0
Frame 1190 detections: 0
Processed frame  1190
Frame 1191 detections: 0
Frame 1192 detections: 0
Frame 1193 detections: 0
Frame 1194 detections: 0
Frame 1195 detections: 0
Frame 1196 detections: 0
Frame 1197 detections: 0
Frame 1198 detections: 0
Frame 1199 detections: 0
Frame 1200 detections: 0
Processed frame  1200
Frame 1201 detections: 0
Frame 1202 detections: 0
Frame 1203 detections: 0
Frame 1204 detections: 0
Frame 1205 detections: 0
Frame 1206 detections: 0
Frame 1207 detections: 0
Frame 1208 detections: 0
Frame 1209 detections: 0
Frame 1210 detections: 0
Processed frame  1210
Frame 1211 detections: 0
Frame 1212 detections: 0
Frame 1213 detections: 0
Frame 1214 detections: 0
Frame 1215 detections: 0
Frame 1216 detections: 0
Frame 1217 detections: 0
Frame 1218 detections: 0
Frame 1219 detections: 0
Frame 1220 detections: 0
Processed frame  1220
Frame 1221 detections: 0
Frame 1222 detections: 0
Frame 1223 detections: 0
Frame 1224 detections: 0
Frame 1225 detections: 0
Frame 1226 detections: 0
Frame 1227 detections: 0
Frame 1228 detections: 0
Frame 1229 detections: 0
Frame 1230 detections: 0
Processed frame  1230
Frame 1231 detections: 0
Frame 1232 detections: 0
Frame 1233 detections: 0
Frame 1234 detections: 0
Frame 1235 detections: 0
Frame 1236 detections: 0
Frame 1237 detections: 0
Frame 1238 detections: 0
Frame 1239 detections: 0
Frame 1240 detections: 0
Processed frame  1240
Frame 1241 detections: 0
Frame 1242 detections: 0
Frame 1243 detections: 0
Frame 1244 detections: 0
Frame 1245 detections: 0
Frame 1246 detections: 0
Frame 1247 detections: 0
Frame 1248 detections: 0
Frame 1249 detections: 0
Frame 1250 detections: 0
Processed frame  1250
Frame 1251 detections: 0
Frame 1252 detections: 0
Frame 1253 detections: 0
Frame 1254 detections: 0
Frame 1255 detections: 0
Frame 1256 detections: 0
Frame 1257 detections: 0
Frame 1258 detections: 0
Frame 1259 detections: 0
Frame 1260 detections: 0
Processed frame  1260
Processing times: total - 331.4441704750061   average - 0.26305092894841753   max - 0.5866920948028564 
Releasing everything